home
***
CD-ROM
|
disk
|
FTP
|
other
***
search
/
Language/OS - Multiplatform Resource Library
/
LANGUAGE OS.iso
/
pcr
/
pcr4_4.lha
/
DIST
/
gc
/
GCmalloc.c
< prev
next >
Wrap
C/C++ Source or Header
|
1992-01-29
|
18KB
|
774 lines
/* begincopyright
Copyright (c) 1988,1990 Xerox Corporation. All rights reserved.
Use and copying of this software and preparation of derivative works based
upon this software are permitted. Any distribution of this software or
derivative works must comply with all applicable United States export
control laws. This software is made available AS IS, and Xerox Corporation
makes no warranty about the software, its performance or its conformity to
any specification. Any person obtaining a copy of this software is requested
to send their name and post office or electronic mail address to:
PCR Coordinator
Xerox PARC
3333 Coyote Hill Rd.
Palo Alto, CA 94304
Parts of this software were derived from code bearing the copyright notice:
Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
This material may be freely distributed, provided this notice is retained.
This material is provided as is, with no warranty expressed or implied.
Use at your own risk.
endcopyright */
/*
* September 28, 1990 1:55:36 pm PDT
*/
/* Top level allocation routines, and related client-callable routines. */
#define DEBUG /* Some run-time consistency checks */
#undef DEBUG
#define VERBOSE
#undef VERBOSE
#include <signal.h>
#include "xr/GCPrivate.h"
#include "xr/ThreadsMsg.h"
#define ERROR_RETURN return((XR_Pointer)0)
# ifdef MERGE_SIZES
/* Set things up so that size_map[i] >= i, but not too much bigger */
/* and so that size_map contains relatively few distinct entries */
/* This is stolen from Russ Atkinson's Cedar quantization */
/* alogrithm (but we precompute it). */
void GC_init_size_map()
{
register int i;
int i_rounded_up = 0;
/* Map size 0 to 1. This avoids problems at lower levels. */
GC_size_map[0] = 1;
GC_size_map[1] = 1;
for (i = 2; i <= 8; i++) {
# ifdef ALIGN_DOUBLE
GC_size_map[i] = (i + 1) & (~1);
# else
GC_size_map[i] = i;
# endif
}
for (i = 9; i <= MAXOBJSZ; i++) {
if (i_rounded_up < i) {
register int size = i;
register unsigned m = 0;
while (size > 7) {
m += 1;
size += 1;
size >>= 1;
}
i_rounded_up = size << m;
if (i_rounded_up > MAXOBJSZ) {
i_rounded_up = MAXOBJSZ;
}
}
GC_size_map[i] = i_rounded_up;
}
}
# endif
static GC_alloc_call_back_type alloc_call_back = 0;
static XR_Pointer alloc_call_back_data = 0;
void GC_register_alloc_callback(fn, data, Ofn, Odata)
GC_alloc_call_back_type fn;
XR_Pointer data;
GC_alloc_call_back_type *Ofn;
XR_Pointer *Odata;
{
XR_MonitorEntry(&GC_alloc_callback_ml);
if (Ofn != 0) {
*Ofn = alloc_call_back;
}
if (Odata != 0) {
*Odata = alloc_call_back_data;
}
alloc_call_back = fn;
alloc_call_back_data = data;
XR_MonitorExit(&GC_alloc_callback_ml);
return;
}
/* allocate lb bytes of atomic data */
XR_Pointer GC_malloc_atomic(lb)
int lb;
{
register struct obj *op;
register struct obj **opp;
register long lw = BYTES_TO_WORDS(lb + (sizeof (word)) -1);
/* This should really be installed as a breakpoint. */
if(alloc_call_back != 0) /* Test is only an optimization */ {
XR_MonitorEntry(&GC_alloc_callback_ml);
if(alloc_call_back != 0) {
op = (struct obj *)((*alloc_call_back)
(lb, TRUE, alloc_call_back_data));
if (op != 0) {
XR_MonitorExit(&GC_alloc_callback_ml);
return((XR_Pointer)op);
}
}
XR_MonitorExit(&GC_alloc_callback_ml);
}
XR_MonitorEntry(&GC_allocate_ml);
GC_words_allocd += lw;
GC_objects_allocd++;
if( lw <= MAXOBJSZ ) {
# ifdef MERGE_SIZES
lw = GC_size_map[lw];
# else
if (lw == 0) lw = 1;
# endif
opp = &(aobjfreelist[lw]);
while( (op = *opp) == ((struct obj *)0) ) {
XR_MonitorExit(&GC_allocate_ml);
GC_delay_alloc(HBLKSIZE);
if (!GC_allocaobj(lw)) {
ERROR_RETURN;
}
XR_MonitorEntry(&GC_allocate_ml);
}
# ifdef DEBUG
if ((get_obj_link(op) != ((struct obj *) 0)
&& (((unsigned)get_obj_link(op)) > ((unsigned) HEAPLIM)
|| ((unsigned)get_obj_link(op)) < ((unsigned) HEAPSTART)))) {
GC_abort("Bad free list in gc_malloc_atomic");
}
# endif
*opp = get_obj_link(op);
/* op->obj_component[0] = 0; */
} else {
register struct hblk * h;
XR_MonitorExit(&GC_allocate_ml);
GC_delay_alloc(lb);
XR_MonitorEntry(&GC_allocate_ml);
if (!GC_sufficient_hb(-lw) && !GC_dont_gc) {
GC_reclaim_hblks(-lw);
}
# ifdef VERBOSE
GC_vprintf("gc_malloc_atomic calling allochblk(%x)\n",lw);
# endif
h = GC_allochblk(-lw);
if (h == (struct hblk *)0) {
XR_MonitorExit(&GC_allocate_ml);
ERROR_RETURN;
}
GC_add_hblklist(h);
op = (struct obj *) (h -> hb_body);
}
# ifdef VISUALIZE
displayAlloc(op, lw);
# endif
XR_MonitorExit(&GC_allocate_ml);
return((XR_Pointer) op);
}
/* allocate lb bytes of possibly composite data */
XR_Pointer GC_malloc(lb)
int lb;
{
register struct obj *op;
register struct obj **opp;
register long lw = BYTES_TO_WORDS(lb + (sizeof (word)) -1);
/* This should really be installed as a breakpoint. */
if(alloc_call_back != 0) /* Test is only an optimization */ {
XR_MonitorEntry(&GC_alloc_callback_ml);
if(alloc_call_back != 0) {
op = (struct obj *)((*alloc_call_back)
(lb, FALSE, alloc_call_back_data));
if (op != 0) {
XR_MonitorExit(&GC_alloc_callback_ml);
return((XR_Pointer)op);
}
}
XR_MonitorExit(&GC_alloc_callback_ml);
}
XR_MonitorEntry(&GC_allocate_ml);
GC_words_allocd += lw;
GC_objects_allocd++;
if( lw <= MAXOBJSZ ) {
# ifdef MERGE_SIZES
lw = GC_size_map[lw];
# else
if (lw == 0) lw = 1;
# endif
opp = &(objfreelist[lw]);
while ((op = *opp) == ((struct obj *)0) ) {
XR_MonitorExit(&GC_allocate_ml);
GC_delay_alloc(HBLKSIZE);
if (!GC_allocobj(lw)) {
ERROR_RETURN;
}
XR_MonitorEntry(&GC_allocate_ml);
}
# ifdef DEBUG
if ((get_obj_link(op) != ((struct obj *) 0)
&& (((unsigned)get_obj_link(op)) > ((unsigned) HEAPLIM)
|| ((unsigned)get_obj_link(op)) < ((unsigned) HEAPSTART)))) {
GC_abort("Bad free list in gc_malloc");
}
# endif
*opp = get_obj_link(op);
op->obj_component[0] = 0;
} else {
register struct hblk * h;
XR_MonitorExit(&GC_allocate_ml);
GC_delay_alloc(lb);
XR_MonitorEntry(&GC_allocate_ml);
if (!GC_sufficient_hb(lw) && !GC_dont_gc) {
GC_reclaim_hblks(lw);
}
# ifdef VERBOSE
GC_vprintf("gc_malloc calling allochblk(%x)\n",lw);
# endif
h = GC_allochblk(lw);
if (h == (struct hblk *)0) {
XR_MonitorExit(&GC_allocate_ml);
ERROR_RETURN;
}
GC_add_hblklist(h);
op = (struct obj *) (h -> hb_body);
}
# ifdef VISUALIZE
displayAlloc(op, lw);
# endif
XR_MonitorExit(&GC_allocate_ml);
return((XR_Pointer)op);
}
/* Explicitly deallocate an object p. We trust the caller to know what he's */
/* doing. */
/* P is assumed to be a pointer to the beginning of the object. */
void GC_free(p)
struct obj *p;
{
register struct hblk *h;
register int sz;
register word * i;
register word * limit;
XR_MonitorEntry(&GC_allocate_ml);
h = HBLKPTR(p);
sz = hb_sz(h);
# ifdef VISUALIZE
displayFree(p, sz);
# endif
if (sz < 0) {
sz = -sz;
if (sz > MAXOBJSZ) {
hb_uninit(h) = 1;
GC_del_hblklist(h);
GC_freehblk(h);
} else {
set_obj_link(p, aobjfreelist[sz]);
aobjfreelist[sz] = p;
}
} else {
if (sz > MAXOBJSZ) {
hb_uninit(h) = 1;
GC_del_hblklist(h);
GC_freehblk(h);
} else {
/* Clear the object, other than link field */
limit = &(p -> obj_component[sz]);
for (i = &(p -> obj_component[1]); i < limit; i++) {
*i = 0;
}
set_obj_link(p, objfreelist[sz]);
objfreelist[sz] = p;
}
}
/* Add it to mem_freed to prevent anomalous heap expansion */
/* in the event of repeated explicit frees of objects of */
/* varying sizes. */
GC_mem_freed += sz;
XR_MonitorExit(&GC_allocate_ml);
}
/* Change the size of the block pointed to by p to contain at least */
/* lb bytes. The object may be (and quite likely will be) moved. */
/* The new object is assumed to be atomic if the original object was. */
/* Shrinking of large blocks is not implemented well. */
XR_Pointer GC_realloc(p,lb)
XR_Pointer p;
int lb;
{
register struct obj *op;
register struct obj **opp;
register struct hblk * h;
register int sz; /* Current size in bytes */
register int orig_sz; /* Original sz in bytes */
bool is_atomic;
h = HBLKPTR(p);
sz = hb_sz(h);
if (sz < 0) {
sz = -sz;
is_atomic = TRUE;
} else {
is_atomic = FALSE;
}
sz = WORDS_TO_BYTES(sz);
orig_sz = sz;
if (sz > WORDS_TO_BYTES(MAXOBJSZ)) {
/* Round it up to the next whole heap block */
sz = (sz+HDR_BYTES+HBLKSIZE-1)
& (~HBLKMASK);
sz -= HDR_BYTES;
hb_sz(h) = BYTES_TO_WORDS(sz);
/* If this is a composite object, then the remaining part is */
/* already cleared. */
}
if (is_atomic) {
if (lb <= sz) {
if (lb >= (sz >> 1)) {
/* Already big enough, but not too much bigger than object. */
/* Ignore the request. */
/* If sz is big enough, we should probably deallocate */
/* part of the heap block here, but ... */
return(p);
} else {
/* shrink */
XR_Pointer result = GC_malloc_atomic(lb);
bcopy(p, result, lb);
GC_free(p);
return(result);
}
} else {
/* grow */
XR_Pointer result = GC_malloc_atomic(lb);
bcopy(p, result, sz);
GC_free(p);
return(result);
}
} else /* composite */ {
if (lb <= sz) {
if (lb >= (sz >> 1)) {
if (orig_sz > lb) {
/* Clear unneeded part of object to avoid bogus pointer */
/* tracing. */
bzero(((char *)p) + lb, orig_sz - lb);
}
return(p);
} else {
/* shrink */
XR_Pointer result = GC_malloc(lb);
bcopy(p, result, lb);
GC_free(p);
return(result);
}
} else {
/* grow */
XR_Pointer result = GC_malloc(lb);
bcopy(p, result, sz);
GC_free(p);
return(result);
}
}
}
void GC_printf(fmt, a, b, c, d, e)
char * fmt;
long a,b,c,d,e;
{
XR_StatsVMsg fmt, a, b, c, d, e);
}
void GC_vprintf(fmt, a, b, c, d, e)
char * fmt;
long a,b,c,d,e;
{
XR_VerboseVMsg fmt, a, b, c, d, e);
}
void GC_iprintf(fmt, a, b, c, d, e)
char * fmt;
long a,b,c,d,e;
{
XR_ErrorVMsg fmt, a, b, c, d, e);
}
void GC_abort(s)
char *s;
{
if (GC_ok_to_panic) {
XR_PanicVMsg "%s\n",s);
XR_Panic(s);
} else {
XR_ErrorVMsg "%s\n foolishly attempting to continue ...\n", s);
}
}
long XR_get_object_size(p)
struct obj *p;
{
struct hblk * h = HBLKPTR(p);
# ifdef INTERIOR_POINTERS
if (!is_hblk(h)) {
char m = get_map(h);
while (m > 0 && m < 0x7f) {
h -= m;
m = get_map(h);
}
}
# endif
return WORDS_TO_BYTES(HB_SIZE(h));
}
/*
* Kludge for pecularities of valloc counted on by the sun window system.
* Note that valloc'd pages are assumed to have no pointers!
* Return NIL if the allocation request threatens to cause the heap
* to grow beyond MAP_SIZE * HBLKSIZE. This test is only a heuristic,
* since there may be holes between heap segments, and the limit is
* really on addresses rather than aggregate heap size.
*
*/
#define VALLOC_LIMIT (MAP_SIZE * HBLKSIZE) - (6*1024*1024)
XR_Pointer XR_valloc(size)
{
unsigned pagesize;
XR_Pointer base, oldbase;
pagesize = getpagesize();
if ((GC_heapsize + size) > VALLOC_LIMIT) ERROR_RETURN;
if (pagesize > HBLKSIZE || HDR_BYTES != 0) {
oldbase = XR_pointerfree_new(size + pagesize);
/* round up to nearest pagesize boundary */
base = (XR_Pointer)(((int)(oldbase + (pagesize - 1))) & (~ (pagesize - 1)));
} else {
oldbase = base = GC_malloc_atomic(size >= pagesize? size : pagesize);
}
XR_make_uncollectable(oldbase, base);
return base;
}
/*
* Prevents an indicated object from ever being collected by putting
* it on a linked list connected rooted in a static.
*/
void XR_make_uncollectable(pinaddr, knownAsAddr)
char *pinaddr, *knownAsAddr;
{
struct uncollectable_structure *ptr;
# ifdef PRINT_ALLOCS
GC_vprintf("Making %x (aka %x) uncollectable.\n", pinaddr, knownAsAddr);
# endif
ptr = (struct uncollectable_structure *)
XR_malloc(sizeof(struct uncollectable_structure));
ptr->next = GC_pin_head;
ptr->addr1 = pinaddr;
ptr->addr2 = knownAsAddr;
GC_pin_head = ptr;
}
void
XR_unmake_uncollectable(ptr)
char *ptr;
{
register struct uncollectable_structure *p, *prev;
if (ptr == 0) return;
p = GC_pin_head;
prev = 0;
while (p && p->addr1 != ptr && p->addr2 != ptr) {
prev = p;
p = p->next;
}
if (p) {
if (prev) {
prev->next = p->next;
} else {
GC_pin_head = p->next;
}
# ifdef PRINT_ALLOCS
GC_vprintf("Making %x (aka %x) collectable again.\n", p->addr1, p->addr2);
# endif
p->addr1 = 0;
p->addr2 = 0;
}
}
void XR_valloc_free(ptr)
char *ptr;
{
XR_unmake_uncollectable(ptr);
}
/* The following are considered OBSOLETE, but are provided for backward */
/* compatibility. They may disappear in the future. */
void XR_unsafe_free(p)
XR_Pointer p;
{
XR_free(p);
}
/* Similar to GC_malloc, but provide space for header. */
XR_Pointer XR_malloc(lb)
long lb;
{
XR_Pointer result = (XR_Pointer)(((long)(GC_malloc(lb+8))) + 8);
if (result == (XR_Pointer)8) {
ERROR_RETURN;
}
return(result);
}
XR_Pointer XR_pointerfree_new(lb)
long lb;
{
XR_Pointer result = (XR_Pointer)(((long)(GC_malloc_atomic(lb+8))) + 8);
if (result == (XR_Pointer)8) {
ERROR_RETURN;
}
return(result);
}
XR_Pointer XR_new(lb)
long lb;
{
return(XR_malloc(lb));
}
XR_Pointer XR_clear_new(i,j)
long i,j;
{
return(XR_malloc(i*j));
}
XR_Pointer XR_ralloc(lw)
unsigned lw;
{
return(XR_pointerfree_new(WORDS_TO_BYTES(lw)));
}
XR_Pointer XR_ralloc_comp(lw)
unsigned lw;
{
return(XR_malloc(WORDS_TO_BYTES(lw)));
}
/* Similar to GC_free, but makes no assumption that p points to the beginning */
/* of the object. */
void XR_free(p)
{
register struct hblk * h;
register long word_no;
register unsigned long sz;
h = HBLKPTR(p);
# ifdef INTERIOR_POINTERS
if (!is_hblk(h)) {
char m = get_map(h);
while (m > 0 && m < 0x7f) {
h -= m;
m = get_map(h);
}
}
sz = HB_SIZE(h);
# endif
word_no = WORD_NO(p, h);
# ifdef INTERIOR_POINTERS
word_no = adjusted_word_no(word_no,sz);
# endif
GC_free(((word *)h) + word_no);
}
XR_Pointer XR_calloc(i,j)
long i,j;
{
return(XR_malloc(i*j));
}
XR_Pointer
XR_realloc(old, size)
register word *old;
long size;
{
register struct hblk * h;
register long word_no;
register unsigned long sz;
register word * old_beg;
register long offset;
h = HBLKPTR(old);
# ifdef INTERIOR_POINTERS
if (!is_hblk(h)) {
char m = get_map(h);
while (m > 0 && m < 0x7f) {
h -= m;
m = get_map(h);
}
}
sz = HB_SIZE(h);
# endif
word_no = WORD_NO(old, h);
# ifdef INTERIOR_POINTERS
word_no = adjusted_word_no(word_no,sz);
# endif
old_beg = ((word *)h) + word_no;
offset = old - old_beg;
return((XR_Pointer)
(((word *)(GC_realloc(old_beg, size + WORDS_TO_BYTES(offset))))
+ offset));
}
/*
XR_Pointer
XR_realloc(old, size)
register word *old;
long size;
{
word *last;
register word *new;
register i;
long copySize;
last = new = (word *)XR_malloc(size);
if (new == (word *)0) { return((XR_Pointer)0); }
copySize = XR_get_object_size( (struct obj *)(old) );
if( copySize > size ) copySize = size;
while( copySize > 0 ) {
*new++ = *old++;
copySize -= sizeof(word);
}
return (XR_Pointer)last;
}
*/
XR_Pointer
XR_pointerfree_clear_new(size)
int size;
{
struct obj *p;
int lw;
size = lw =(size + (sizeof (word)) -1) / (sizeof (word));
p = (struct obj *) XR_ralloc(size);
if (p == (struct obj *)0) {
return((XR_Pointer)0);
}
for (lw--; lw >= 0; lw--) {
p -> obj_component[lw] = 0;
}
return((XR_Pointer)p);
}
/*
* Versions of above routines that traffic in uncollectable
* pointer-free objects. Also considered OBSOLETE.
*/
XR_Pointer
XR_UNCollect_malloc(size)
long size;
{
XR_Pointer base = XR_pointerfree_clear_new(size);
XR_make_uncollectable(base,0);
return base;
}
XR_Pointer
XR_UNCollect_calloc(size_elem, num_elem)
long size_elem, num_elem;
{
XR_Pointer base = XR_pointerfree_clear_new(size_elem * num_elem);
XR_make_uncollectable(base,0);
return base;
}
XR_Pointer
XR_UNCollect_realloc(old, size)
register word *old;
long size;
{
register word *new;
word *new2, *old2;
register i;
long copySize;
new = new2 = (word *)XR_pointerfree_clear_new(size);
if (new == (word *)0) { return((XR_Pointer)0); }
XR_make_uncollectable(new,0);
copySize = XR_get_object_size( (struct obj *)(old) );
if( copySize > size ) copySize = size;
while( copySize > 0 ) {
*new++ = *old++;
copySize -= sizeof(word);
}
XR_unmake_uncollectable(old2);
return (XR_Pointer)new2;
}
void
XR_UNCollect_free(ptr)
char *ptr;
{
XR_unmake_uncollectable(ptr);
}
/* The following are OBSOLETE control routines that may still be called
* by some clients.
*/
bool XR_GCGetMiserlyHeap()
{ return(FALSE); }
bool XR_GCSetMiserlyHeap(/* bool */)
{ return(FALSE); }